3.17. Robot SimulationΒΆ
In [1]:
import jyro.simulator as jy
import random
import numpy as np
In [2]:
robot = jy.Pioneer("Pioneer", 3.5, 2, 0)
robot.addDevice(jy.Pioneer16Sonars())
robot.addDevice(jy.DepthCamera(4))
light_sensors = jy.PioneerFrontLightSensors(3.0)
light_sensors.lightMode = 'ambient'
robot.addDevice(light_sensors)
Out[2]:
In [3]:
def worldf(physics):
physics.addBox(0, 0, 4, 4, fill="backgroundgreen", wallcolor="gray")
physics.addLight(2, 0.75, 1.0) # increased brightness for new linear version of lights
In [4]:
sim = jy.VSimulator(robot, worldf)
In [5]:
camera = robot.device["camera"]
In [6]:
image = camera.getImage()
image
Out[6]:
In [7]:
image.size
Out[7]:
(60, 40)
In [8]:
data = camera.getData()
data.shape
Out[8]:
(40, 60, 3)
In [9]:
robot.move(0.50, 0.35)
In [10]:
sim.step()
In [11]:
robot = jy.Pioneer("Pioneer", 3.5, 2, 0)
robot.addDevice(jy.Pioneer16Sonars())
robot.addDevice(jy.Camera())
light_sensors = jy.PioneerFrontLightSensors(3.0)
light_sensors.lightMode = 'ambient'
robot.addDevice(light_sensors)
Out[11]:
In [12]:
sim = jy.VSimulator(robot, worldf)
In [13]:
camera = robot.device["camera"]
image = camera.getImage()
image
Out[13]:
In [14]:
data = camera.getData()
data.shape
Out[14]:
(40, 60, 3)
In [15]:
def random_action():
"""Generate a random action from a limited set of possible settings"""
possible = [-1.0, -0.5, 0.0, 0.5, 1.0]
return [random.choice(possible), random.choice(possible)]
def get_senses(robot):
light = robot["light"].getData()
sonar = [v/3.0 for v in robot["sonar"].getData()]
camera = robot["camera"].getData()
return [light, sonar, camera]
In [16]:
senses = get_senses(robot)
list(map(len, senses))
Out[16]:
[2, 16, 40]
In [17]:
robot.history = []
def brain(robot):
senses = get_senses(robot)
net.propagate(senses)
translate, rotate = random_action()
#self.move(translate, rotate)
robot.history.append(robot.getPose())
robot.move(0.50, 0.35)
In [18]:
robot.brain = brain
In [19]:
import conx as cx
Using Theano backend.
Conx, version 3.6.0
In [20]:
net = cx.Network("Robot Prediction Network")
net.add(cx.Layer("light", 2),
cx.Layer("sonar", 16),
cx.ImageLayer("camera", (40,60), 3),
cx.FlattenLayer("flatten"),
cx.Conv2DLayer("conv", 16, (3,3)),
cx.Layer("hidden", 50, activation="relu"),
cx.Layer("output1", 2, activation="sigmoid"),
cx.Layer("hidden2", 5, activation="sigmoid"),
cx.Layer("hidden3", 10, activation="sigmoid", dropout=0.25),
cx.Layer("hidden4", 10, activation="sigmoid"),
cx.Layer("output2", 5, activation="sigmoid"))
Out[20]:
'output2'
In [21]:
net.connect("sonar", "hidden2")
net.connect("light", "hidden")
net.connect("camera", "conv")
net.connect("conv", "flatten")
net.connect("flatten", "hidden2")
net.connect("hidden", "hidden2")
net.connect("hidden2", "hidden3")
##net.connect("hidden2", "output2")
net.connect("hidden3", "output2")
net.connect("hidden3", "hidden4")
net.connect("hidden4", "output1")
In [22]:
net.compile(error="mean_squared_error", optimizer="adam")
In [23]:
net.picture()
Out[23]:
In [24]:
matrix = net.propagate_to("conv", get_senses(robot))
In [25]:
net["conv"].feature = 6
In [26]:
net.propagate_to_features("conv", get_senses(robot), scale=3)
Out[26]:
In [27]:
net.dataset.append([[0] * 2, [0] * 16, data], [[0] * 2, [1] + ([0] * 4)])
In [28]:
net.dashboard()
In [29]:
net.picture()
Out[29]:
In [30]:
net.test()
========================================================
Testing validation dataset with tolerance 0.1...
Total count: 1
correct: 0
incorrect: 1
Total percentage correct: 0.0
In [31]:
net.delete()
net.reset()
In [32]:
if net.saved():
net.load()
net.plot_results()
else:
net.train(epochs=200)
net.save()
========================================================
| Training | output1 | output2
Epochs | Error | acc | acc
------ | --------- | --------- | ---------
# 200 | 0.08392 | 0.00000 | 0.00000
In [33]:
net.plot("all")
In [34]:
net.test(show=True)
========================================================
Testing validation dataset with tolerance 0.1...
# | inputs | targets | outputs | result
---------------------------------------
0 | [[0.00,0.00],[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00, 0.00],[[[0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90], ..., [0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90]], [[0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90], ..., [0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90]], [[0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90], ..., [0.68,0.85,0.90], [0.68,0.85,0.90], [0.68,0.85,0.90]], ..., [[0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87], ..., [0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87]], [[0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87], ..., [0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87]], [[0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87], ..., [0.93,0.95,0.87], [0.93,0.95,0.87], [0.93,0.95,0.87]]]] | [[0.00,0.00],[1.00,0.00,0.00,0.00,0.00]] | [[0.22,0.18],[0.80,0.27,0.23,0.16,0.34]] | X
Total count: 1
correct: 0
incorrect: 1
Total percentage correct: 0.0
In [35]:
for i in range(100):
sim.step()
In [36]:
def function(simulator, index):
cam_image = simulator.get_image()
return (simulator.canvas.render("pil"),
cam_image.resize((cam_image.size[0] * 4,
cam_image.size[1] * 4)))
In [37]:
sim.playback(robot.history, function)
In [38]:
def function(simulator, index):
cam_image = simulator.get_image()
return simulator.canvas.render("pil")
In [39]:
sim.movie(robot.history, function, movie_name="sim-robot.gif")
Out[39]: